PUQ Docker NextCloud deploy
工作流概述
这是一个包含44个节点的复杂工作流,主要用于自动化处理各种任务。
工作流源代码
{
"id": "d3xtaER6gl4aqLZR",
"meta": {
"instanceId": "ffb0782f8b2cf4278577cb919e0cd26141bc9ff8774294348146d454633aa4e3",
"templateCredsSetupCompleted": true
},
"name": "PUQ Docker NextCloud deploy",
"tags": [],
"nodes": [
{
"id": "dc9d4284-0ff7-4068-af3d-2b7f38451118",
"name": "If",
"type": "n8n-nodes-base.if",
"position": [
540,
920
],
"parameters": {
"options": {},
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "or",
"conditions": [
{
"id": "b702e607-888a-42c9-b9a7-f9d2a64dfccd",
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.server_domain }}",
"rightValue": "=d01-test.uuq.pl"
},
{
"id": "8a6662a4-4539-4ab1-bd5b-46b0a0d6e023",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.server_domain }}",
"rightValue": "d02-test.uuq.pl"
}
]
}
},
"typeVersion": 2.2
},
{
"id": "b015bca6-fe71-4eb4-8e99-2904911c03b3",
"name": "Parametrs",
"type": "n8n-nodes-base.set",
"position": [
320,
920
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "370ddc4e-0fc0-48f6-9b30-ebdfba72c62f",
"name": "clients_dir",
"type": "string",
"value": "/opt/docker/clients"
},
{
"id": "92202bb8-6113-4bc5-9a29-79d238456df2",
"name": "mount_dir",
"type": "string",
"value": "/mnt"
},
{
"id": "baa52df2-9c10-42b2-939f-f05ea85ea2be",
"name": "screen_left",
"type": "string",
"value": "{{"
},
{
"id": "2b19ed99-2630-412a-98b6-4be44d35d2e7",
"name": "screen_right",
"type": "string",
"value": "}}"
}
]
}
},
"typeVersion": 3.4
},
{
"id": "b0c5ccb8-0692-4bb0-99e1-769fde372e0f",
"name": "API",
"type": "n8n-nodes-base.webhook",
"position": [
0,
920
],
"webhookId": "4e8168b3-2cad-462a-9750-152986331ce2",
"parameters": {
"path": "docker-nextcloud",
"options": {},
"httpMethod": [
"POST"
],
"responseMode": "responseNode",
"authentication": "basicAuth",
"multipleMethods": true
},
"credentials": {
"httpBasicAuth": {
"id": "0gzq1np6ZmIrtK5o",
"name": "nextcloud"
}
},
"typeVersion": 2
},
{
"id": "bcaf7ce1-464a-492e-b7f5-50ba8e465171",
"name": "422-Invalid server domain",
"type": "n8n-nodes-base.respondToWebhook",
"position": [
500,
1240
],
"parameters": {
"options": {
"responseCode": 422
},
"respondWith": "json",
"responseBody": "[{
\"status\": \"error\",
\"error\": \"Invalid server domain\"
}]"
},
"typeVersion": 1.1,
"alwaysOutputData": false
},
{
"id": "3c642087-bd6b-4996-890b-4d50fbca8c55",
"name": "Container Actions",
"type": "n8n-nodes-base.switch",
"position": [
940,
1740
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "start",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "66ad264d-5393-410c-bfa3-011ab8eb234a",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_start"
}
]
},
"renameOutput": true
},
{
"outputKey": "stop",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "b48957a0-22c0-4ac0-82ef-abd9e7ab0207",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_stop"
}
]
},
"renameOutput": true
},
{
"outputKey": "mount_disk",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "727971bf-4218-41c1-9b07-22df4b947852",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_mount_disk"
}
]
},
"renameOutput": true
},
{
"outputKey": "unmount_disk",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "0c80b1d9-e7ca-4cf3-b3ac-b40fdf4dd8f8",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_unmount_disk"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_get_acl",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "72a60c6b-5dc5-48db-8d3a-e083ffad6ae2",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_get_acl"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_set_acl",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "74eb2334-6176-46ef-b444-d99b439fea17",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_set_acl"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_get_net",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "817ef082-a2d8-4b13-a8df-6e946878653b",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_get_net"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "396e6074-98ec-47df-956c-ce5c3b75e57e",
"name": "Container Stats",
"type": "n8n-nodes-base.switch",
"position": [
940,
1080
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "inspect",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "66ad264d-5393-410c-bfa3-011ab8eb234a",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_information_inspect"
}
]
},
"renameOutput": true
},
{
"outputKey": "stats",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "b48957a0-22c0-4ac0-82ef-abd9e7ab0207",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_information_stats"
}
]
},
"renameOutput": true
},
{
"outputKey": "log",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "50ede522-af22-4b7a-b1fd-34b27fd3fadd",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_log"
}
]
},
"renameOutput": true
},
{
"outputKey": "dependent_containers_information_stats",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "d3070310-d3c2-4200-9765-495cf69fa835",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "dependent_containers_information_stats"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_update_dns_record",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "dc17d6ad-4fa1-4006-8718-8188efa5f458",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_update_dns_record"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "d0084a58-b157-4635-955a-8638f348bf72",
"name": "Inspect",
"type": "n8n-nodes-base.set",
"position": [
1260,
760
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}\"
INSPECT_JSON=\"{}\"
if sudo docker ps -a --filter \"name=$CONTAINER_NAME\" | grep -q \"$CONTAINER_NAME\"; then
INSPECT_JSON=$(sudo docker inspect \"$CONTAINER_NAME\")
fi
echo \"{\\"inspect\\": $INSPECT_JSON}\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "cec87c49-d7ea-4407-bc4c-21ea75b25baa",
"name": "Stat",
"type": "n8n-nodes-base.set",
"position": [
1260,
920
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
STATUS_FILE=\"$COMPOSE_DIR/status.json\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"
# Initialize empty container data
INSPECT_JSON=\"{}\"
STATS_JSON=\"{}\"
# Check if container is running
if sudo docker ps -a --filter \"name=$CONTAINER_NAME\" | grep -q \"$CONTAINER_NAME\"; then
# Get Docker inspect info in JSON (as raw string)
INSPECT_JSON=$(sudo docker inspect \"$CONTAINER_NAME\")
# Get Docker stats info in JSON (as raw string)
STATS_JSON=$(sudo docker stats --no-stream --format \"{{ $('Parametrs').item.json.screen_left }}json .{{ $('Parametrs').item.json.screen_right }}\" \"$CONTAINER_NAME\")
STATS_JSON=${STATS_JSON:-'{}'}
fi
# Initialize disk info variables
MOUNT_USED=\"N/A\"
MOUNT_FREE=\"N/A\"
MOUNT_TOTAL=\"N/A\"
MOUNT_PERCENT=\"N/A\"
IMG_SIZE=\"N/A\"
IMG_PERCENT=\"N/A\"
DISK_STATS_IMG=\"N/A\"
# Check if mount directory exists and is accessible
if [ -d \"$MOUNT_DIR\" ]; then
if mount | grep -q \"$MOUNT_DIR\"; then
# Get disk usage for mounted directory
DISK_STATS_MOUNT=$(df -h \"$MOUNT_DIR\" | tail -n 1)
MOUNT_USED=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $3}')
MOUNT_FREE=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $4}')
MOUNT_TOTAL=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $2}')
MOUNT_PERCENT=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $5}')
fi
fi
# Check if image file exists
if [ -f \"$IMG_FILE\" ]; then
# Get disk usage for image file
IMG_SIZE=$(du -sh \"$IMG_FILE\" | awk '{print $1}')
fi
# Manually create a combined JSON object
FINAL_JSON=\"{\\"inspect\\": $INSPECT_JSON, \\"stats\\": $STATS_JSON, \\"disk\\": {\\"mounted\\": {\\"used\\": \\"$MOUNT_USED\\", \\"free\\": \\"$MOUNT_FREE\\", \\"total\\": \\"$MOUNT_TOTAL\\", \\"percent\\": \\"$MOUNT_PERCENT\\"}, \\"img_file\\": {\\"size\\": \\"$IMG_SIZE\\"}}}\"
# Output the result
echo \"$FINAL_JSON\"
exit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "80dcd9b2-f1f5-44c3-98e8-38dae5ad4edb",
"name": "Start",
"type": "n8n-nodes-base.set",
"position": [
1400,
1500
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
STATUS_FILE=\"$COMPOSE_DIR/status.json\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
# Function to log an error, write to status file, and print to console
handle_error() {
echo \"error: $1\"
exit 1
}
if ! df -h | grep -q \"$MOUNT_DIR\"; then
handle_error \"The file $IMG_FILE is not mounted to $MOUNT_DIR\"
fi
if sudo docker ps --filter \"name={{ $('API').item.json.body.domain }}\" --filter \"status=running\" -q | grep -q .; then
handle_error \"{{ $('API').item.json.body.domain }} container is running\"
fi
# Change to the compose directory
cd \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to change directory to $COMPOSE_DIR\"
# Start the Docker containers
if ! sudo docker-compose up -d > /dev/null 2>error.log; then
ERROR_MSG=$(tail -n 10 error.log)
handle_error \"Docker-compose failed: $ERROR_MSG\"
fi
# Success
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "9cde27ca-4749-4660-9d46-d3161946b627",
"name": "Stop",
"type": "n8n-nodes-base.set",
"position": [
1400,
1660
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
STATUS_FILE=\"$COMPOSE_DIR/status.json\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
# Function to log an error, write to status file, and print to console
handle_error() {
echo \"error: $1\"
exit 1
}
# Check if Docker container is running
if ! sudo docker ps --filter \"name={{ $('API').item.json.body.domain }}\" --filter \"status=running\" -q | grep -q .; then
handle_error \"{{ $('API').item.json.body.domain }} container is not running\"
fi
# Stop and remove the Docker containers (also remove associated volumes)
if ! sudo docker-compose -f \"$COMPOSE_DIR/docker-compose.yml\" down > /dev/null 2>&1; then
handle_error \"Failed to stop and remove docker-compose containers\"
fi
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "f957ffb7-ccb5-41b2-b89e-ef1a92942251",
"name": "Mount Disk",
"type": "n8n-nodes-base.set",
"position": [
1400,
1820
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
STATUS_FILE=\"$COMPOSE_DIR/status.json\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
# Function to log an error, write to status file, and print to console
handle_error() {
echo \"error: $1\"
exit 1
}
# Create necessary directories with permissions
sudo mkdir -p \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to create $MOUNT_DIR\"
sudo chmod 777 \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $MOUNT_DIR\"
if df -h | grep -q \"$MOUNT_DIR\"; then
handle_error \"The file $IMG_FILE is mounted to $MOUNT_DIR\"
fi
if ! grep -q \"$IMG_FILE\" /etc/fstab; then
echo \"$IMG_FILE $MOUNT_DIR ext4 loop 0 0\" | sudo tee -a /etc/fstab > /dev/null || handle_error \"Failed to add entry to /etc/fstab\"
fi
sudo mount -a || handle_error \"Failed to mount entries from /etc/fstab\"
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "00cb7b5b-429e-494f-b2a9-1c0c45ac8d66",
"name": "Unmount Disk",
"type": "n8n-nodes-base.set",
"position": [
1400,
1980
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
STATUS_FILE=\"$COMPOSE_DIR/status.json\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
# Function to log an error, write to status file, and print to console
handle_error() {
echo \"error: $1\"
exit 1
}
if ! df -h | grep -q \"$MOUNT_DIR\"; then
handle_error \"The file $IMG_FILE is not mounted to $MOUNT_DIR\"
fi
# Remove the mount entry from /etc/fstab if it exists
if grep -q \"$IMG_FILE\" /etc/fstab; then
sudo sed -i \"\|$(printf '%s\n' \"$IMG_FILE\" | sed 's/[.[\*^$]/\\&/g')|d\" /etc/fstab
fi
# Unmount the image if it is mounted (using fstab)
if mount | grep -q \"$MOUNT_DIR\"; then
sudo umount \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to unmount $MOUNT_DIR\"
fi
# Remove the mount directory (if needed)
if ! sudo rm -rf \"$MOUNT_DIR\" > /dev/null 2>&1; then
handle_error \"Failed to remove $MOUNT_DIR\"
fi
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "49487b07-8b7f-48c4-b7d0-819336ce6691",
"name": "Log",
"type": "n8n-nodes-base.set",
"position": [
1420,
1040
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"
LOGS_JSON=\"{}\"
# Function to return error in JSON format
handle_error() {
echo \"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
exit 1
}
# Check if the container exists
if ! sudo docker ps -a | grep -q \"$CONTAINER_NAME\" > /dev/null 2>&1; then
handle_error \"Container $CONTAINER_NAME not found\"
fi
# Get logs of the container
LOGS=$(sudo docker logs --tail 1000 \"$CONTAINER_NAME\" 2>&1)
if [ $? -ne 0 ]; then
handle_error \"Failed to retrieve logs for $CONTAINER_NAME\"
fi
# Escape double quotes in logs for valid JSON
LOGS_ESCAPED=$(echo \"$LOGS\" | sed 's/\"/\\\"/g' | sed ':a;N;$!ba;s/\n/\\n/g')
# Format logs as JSON
LOGS_JSON=\"{\\"logs\\": \\"$LOGS_ESCAPED\\"}\"
echo \"$LOGS_JSON\"
exit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "f8dfb4a8-5887-4796-9d1e-f882947fe9e8",
"name": "Sticky Note",
"type": "n8n-nodes-base.stickyNote",
"position": [
0,
0
],
"parameters": {
"color": 6,
"width": 639,
"height": 909,
"content": "## 👋 Welcome to PUQ Docker NextCloud deploy!
# Template for Docker NextCloud: API Backend for WHMCS/WISECP by PUQcloud
This is an Docker NextCloud template that creates an API backend for the WHMCS/WISECP module developed by PUQcloud.
## Setup Instructions
### 1. Configure API Webhook and SSH Access
- Create a Credential (Basic Auth) for the **Webhook API Block** in n8n.
- Create a Credential for **SSH access** to a server with Docker installed (**SSH Block**).
### 2. Install Required Packages on the Docker Server
Run the following command on your server:
```
apt-get install sqlite3 apache2-utils -y
```
### 3. Modify Template Parameters
In the **Parameters** block of the template, update the following settings:
- `server_domain` – must match the domain of the WHMCS/WISECP Docker server.
- `clients_dir` – directory where user data related to Docker and disks will be stored.
- `mount_dir` – default mount point for the container disk (recommended not to change).
**Do not modify** the following technical parameters:
- `screen_left`
- `screen_right`
## Additional Resources
- Full documentation: [https://doc.puq.info/books/docker-nextcloud-whmcs-module](https://doc.puq.info/books/docker-nextcloud-whmcs-module)
- WHMCS module: [https://puqcloud.com/whmcs-module-docker-nextcloud.php](https://puqcloud.com/whmcs-module-docker-nextcloud.php)
"
},
"typeVersion": 1
},
{
"id": "29bd957b-a5be-4a6e-81e3-ba7d88462d93",
"name": "Deploy-docker-compose",
"type": "n8n-nodes-base.set",
"position": [
1340,
20
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "docker-compose",
"type": "string",
"value": "=version: \"3.8\"
services:
{{ $('API').item.json.body.domain }}_nextcloud:
image: nextcloud:latest
container_name: {{ $('API').item.json.body.domain }}_nextcloud
environment:
NEXTCLOUD_ADMIN_USER: {{ $('API').item.json.body.nc_admin_user }}
NEXTCLOUD_ADMIN_PASSWORD: {{ $('API').item.json.body.nc_admin_password }}
NEXTCLOUD_TRUSTED_DOMAINS: {{ $('API').item.json.body.domain }}
MYSQL_PASSWORD: {{ $('API').item.json.body.mysql_password }}
MYSQL_DATABASE: {{ $('API').item.json.body.mysql_database }}
MYSQL_USER: {{ $('API').item.json.body.mysql_user }}
MYSQL_HOST: {{ $('API').item.json.body.domain }}_db
REDIS_HOST: {{ $('API').item.json.body.domain }}_redis
VIRTUAL_HOST: {{ $('API').item.json.body.domain }}
LETSENCRYPT_HOST: {{ $('API').item.json.body.domain }}
volumes:
- \"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}/config:/var/www/html/config\"
- \"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}/data:/var/www/html/data\"
- \"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}/html:/var/www/html\"
networks:
- nginx-proxy_web
depends_on:
- {{ $('API').item.json.body.domain }}_db
- {{ $('API').item.json.body.domain }}_redis
- {{ $('API').item.json.body.domain }}_collabora
mem_limit: \"{{ $('API').item.json.body.ram }}G\"
cpus: \"{{ $('API').item.json.body.cpu }}\"
{{ $('API').item.json.body.domain }}_db:
image: mariadb:11.4
container_name: {{ $('API').item.json.body.domain }}_db
environment:
MYSQL_ROOT_PASSWORD: {{ $('API').item.json.body.mysql_root_password }}
MYSQL_PASSWORD: {{ $('API').item.json.body.mysql_password }}
MYSQL_DATABASE: {{ $('API').item.json.body.mysql_database }}
MYSQL_USER: {{ $('API').item.json.body.mysql_user }}
volumes:
- \"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}/db:/var/lib/mysql\"
networks:
- nginx-proxy_web
mem_limit: \"{{ Number($('API').item.json.body.ram) / 2 }}G\"
cpus: \"{{ Number($('API').item.json.body.cpu) / 2 }}\"
{{ $('API').item.json.body.domain }}_redis:
image: redis:alpine
container_name: {{ $('API').item.json.body.domain }}_redis
networks:
- nginx-proxy_web
mem_limit: \"{{ Number($('API').item.json.body.ram) / 4 }}G\"
cpus: \"{{ Number($('API').item.json.body.cpu) / 4 }}\"
{{ $('API').item.json.body.domain }}_collabora:
image: collabora/code
container_name: {{ $('API').item.json.body.domain }}_collabora
environment:
- domain={{ $('API').item.json.body.office_domain_escaped }}:443
- server_name=office.{{ $('API').item.json.body.domain }}
- username={{ $('API').item.json.body.mysql_user }}
- password={{ $('API').item.json.body.mysql_password }}
- \"dictionaries=ru_RU uk_UA pl_PL en\"
- \"extra_params=--o:ssl.enable=true --o:ssl.termination=true --o:net.proto=https --o:ssl.le=true --o:storage.wopi.host=https://{{ $('API').item.json.body.domain }}\"
- VIRTUAL_HOST=office.{{ $('API').item.json.body.domain }}
- LETSENCRYPT_HOST=office.{{ $('API').item.json.body.domain }}
- VIRTUAL_PROTO=https
- VIRTUAL_PORT=9980
cap_add:
- MKNOD
- SYS_ADMIN
extra_hosts:
- \"{{ $('API').item.json.body.domain }}:77.87.125.201\"
dns:
- 8.8.8.8
- 8.8.4.4
networks:
- nginx-proxy_web
mem_limit: \"{{ Number($('API').item.json.body.ram) }}G\"
cpus: \"{{ Number($('API').item.json.body.cpu) / 2 }}\"
networks:
nginx-proxy_web:
external: true
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "4243c90b-de8a-4931-972b-5f700edb09d4",
"name": "Version",
"type": "n8n-nodes-base.set",
"position": [
1380,
2640
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Define the container name dynamically using an API call
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"
VERSION_JSON=\"{}\"
# Function to handle errors and return a JSON-formatted message
handle_error() {
echo \"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
exit 1
}
# Check if the container exists by searching for its name in the list of all Docker containers
if ! sudo docker ps -a | grep -q \"$CONTAINER_NAME\" > /dev/null 2>&1; then
handle_error \"Container $CONTAINER_NAME not found\"
fi
# Retrieve the Nextcloud status as a JSON response from the container
# The '-u 33' option ensures that the command is executed as the Nextcloud user (www-data)
NEXTCLOUD_STATUS=$(sudo docker exec -u 33 \"$CONTAINER_NAME\" php occ status --output=json 2>/dev/null)
# Validate if the command was executed successfully and if the output is not empty
if [ $? -ne 0 ] || [ -z \"$NEXTCLOUD_STATUS\" ]; then
handle_error \"Failed to retrieve Nextcloud status for $CONTAINER_NAME\"
fi
# Extract the Nextcloud version string from the JSON response
VERSION=$(echo \"$NEXTCLOUD_STATUS\" | jq -r '.versionstring')
# Ensure that a valid version string was extracted
if [ -z \"$VERSION\" ]; then
handle_error \"Failed to parse Nextcloud version from response\"
fi
# Construct a JSON-formatted output containing the Nextcloud version
VERSION_JSON=\"{\\"version\\": \\"$VERSION\\"}\"
# Print the JSON result
echo \"$VERSION_JSON\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "4f13c4f2-82dd-478f-915b-247a071db107",
"name": "Users",
"type": "n8n-nodes-base.set",
"position": [
1380,
2780
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Define the container name dynamically using an API call
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"
USERS_JSON=\"{}\"
# Function to handle errors and return a JSON-formatted message
handle_error() {
echo \"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
exit 1
}
# Check if the container exists by searching for its name in the list of all Docker containers
if ! sudo docker ps -a | grep -q \"$CONTAINER_NAME\" > /dev/null 2>&1; then
handle_error \"Container $CONTAINER_NAME not found\"
fi
# Retrieve the list of Nextcloud users and reformat it into a proper JSON array
USERS=$(sudo docker exec -u 33 \"$CONTAINER_NAME\" php occ user:list --output=json 2>/dev/null | jq -c 'to_entries | map({username: .key, displayname: .value})')
# Validate if the command executed successfully and output is not empty
if [ $? -ne 0 ] || [ -z \"$USERS\" ]; then
handle_error \"Failed to retrieve users from Nextcloud\"
fi
# Construct a JSON-formatted output containing all retrieved users
USERS_JSON=\"{\\"users\\": $USERS}\"
# Print the JSON result
echo \"$USERS_JSON\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "6d385bc7-01f1-4d42-b16e-a2e45927ef7f",
"name": "Change Password",
"type": "n8n-nodes-base.set",
"position": [
1380,
2960
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"
NC_USER=\"{{ $('API').item.json.body.user_email }}\"
NEW_PASSWORD=\"{{ $('API').item.json.body.password }}\"
# Function to output error in JSON format and exit with code 1
handle_error() {
echo \"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
exit 1
}
# Check if container name is provided
if [ -z \"$CONTAINER_NAME\" ]; then
handle_error \"No container name provided\"
fi
# Check if Nextcloud username is provided
if [ -z \"$NC_USER\" ]; then
handle_error \"No Nextcloud user provided\"
fi
# Check if password is provided
if [ -z \"$NEW_PASSWORD\" ]; then
handle_error \"No password provided\"
fi
# Run command in container
# -u 33 => as UID 33 (often www-data in Nextcloud)
# -e OC_PASS=\"$NEW_PASSWORD\" => pass password through environment to container
# php occ user:resetpassword --password-from-env \"$NC_USER\"
# returns 0 if successful
OUTPUT=$( sudo docker exec -u 33 \
-e OC_PASS=\"$NEW_PASSWORD\" \
\"$CONTAINER_NAME\" \
php occ user:resetpassword --password-from-env \"$NC_USER\" 2>&1 )
# Check return code
if [ $? -ne 0 ]; then
handle_error \"Failed to reset password. Output: $OUTPUT\"
fi
echo \"{\\"status\\": \\"success\\"}\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "dd283191-a5cd-4d29-8c2d-0ef42b63f69c",
"name": "NextCloud",
"type": "n8n-nodes-base.switch",
"position": [
920,
2620
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "version",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "66ad264d-5393-410c-bfa3-011ab8eb234a",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "app_version"
}
]
},
"renameOutput": true
},
{
"outputKey": "users",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "b48957a0-22c0-4ac0-82ef-abd9e7ab0207",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "app_users"
}
]
},
"renameOutput": true
},
{
"outputKey": "change_password",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "7c862a6f-5df1-499c-b9c6-9b266e2bebec",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "change_password"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "9f5e3d3e-4f6d-4967-aefe-b953c5c3418b",
"name": "nginx",
"type": "n8n-nodes-base.set",
"position": [
1080,
140
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "main",
"type": "string",
"value": "=# Increase max body size for large file uploads
client_max_body_size 50000M;
# Proxy headers
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection \"upgrade\";
# Timeouts
proxy_read_timeout 600s;
proxy_send_timeout 600s;
send_timeout 600s;
# Additional optimizations
proxy_buffering off;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
proxy_temp_file_write_size 256k;
proxy_connect_timeout 600s;
"
},
{
"id": "6507763a-21d4-4ff0-84d2-5dc9d21b7430",
"name": "main_location",
"type": "string",
"value": "="
},
{
"id": "d00aa07a-0641-43ef-8fd2-5fb9ef62e313",
"name": "office",
"type": "string",
"value": "=server_name office.{{ $('API').item.json.body.domain }};
# static files
location ^~ /browser {
proxy_pass https://office.{{ $('API').item.json.body.domain }};
proxy_set_header Host $host;
}
# WOPI discovery URL
location ^~ /hosting/discovery {
proxy_pass https://office.{{ $('API').item.json.body.domain }};
proxy_set_header Host $host;
}
# Capabilities
location ^~ /hosting/capabilities {
proxy_pass https://office.{{ $('API').item.json.body.domain }};
proxy_set_header Host $host;
}
# main websocket
location ~ ^/cool/(.*)/ws$ {
proxy_pass https://office.{{ $('API').item.json.body.domain }};
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection \"Upgrade\";
proxy_set_header Host $host;
proxy_read_timeout 36000s;
}
# download, presentation and image upload
location ~ ^/(c|l)ool {
proxy_pass https://office.{{ $('API').item.json.body.domain }};
proxy_set_header Host $host;
}
# Admin Console websocket
location ^~ /cool/adminws {
proxy_pass https://office.{{ $('API').item.json.body.domain }};
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection \"Upgrade\";
proxy_set_header Host $host;
proxy_read_timeout 36000s;
}
"
},
{
"id": "c00fb803-8b9f-4aca-a1b1-2e3da42fc8d1",
"name": "office_location",
"type": "string",
"value": "="
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "fa40012b-0e58-4d6c-af19-b9dd6c72386d",
"name": "Test Connection",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1920,
-40
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Function to log an error, print to console
handle_error() {
echo \"error: $1\"
exit 1
}
# Check if Docker is installed
if ! command -v docker &> /dev/null; then
handle_error \"Docker is not installed\"
fi
# Check if Docker service is running
if ! systemctl is-active --quiet docker; then
handle_error \"Docker service is not running\"
fi
# Check if nginx-proxy container is running
if ! sudo docker ps --filter \"name=nginx-proxy\" --filter \"status=running\" -q > /dev/null; then
handle_error \"nginx-proxy container is not running\"
fi
# Check if letsencrypt-nginx-proxy-companion container is running
if ! sudo docker ps --filter \"name=letsencrypt-nginx-proxy-companion\" --filter \"status=running\" -q > /dev/null; then
handle_error \"letsencrypt-nginx-proxy-companion container is not running\"
fi
# If everything is successful
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "12240691-bcbe-407c-b53c-89cf84bc190f",
"name": "ChangePackage",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1920,
840
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Get values for variables from templates
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
COMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"
STATUS_FILE=\"$COMPOSE_DIR/status\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"
DOCKER_COMPOSE_TEXT='{{ JSON.stringify($('Deploy-docker-compose').item.json['docker-compose']).base64Encode() }}'
NGINX_MAIN_TEXT='{{ JSON.stringify($('nginx').item.json['main']).base64Encode() }}'
NGINX_MAIN_FILE=\"$NGINX_DIR/$DOMAIN\"
VHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"
NGINX_MAIN_LOCATION_TEXT='{{ JSON.stringify($('nginx').item.json['main_location']).base64Encode() }}'
NGINX_MAIN_LOCATION_FILE=\"$NGINX_DIR/$DOMAIN\"_location
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
NGINX_OFFICE_TEXT='{{ JSON.stringify($('nginx').item.json['office']).base64Encode() }}'
NGINX_OFFICE_FILE=\"$NGINX_DIR/office.$DOMAIN\"
VHOST_OFFICE_FILE=\"$VHOST_DIR/office.$DOMAIN\"
NGINX_OFFICE_LOCATION_TEXT='{{ JSON.stringify($('nginx').item.json['office_location']).base64Encode() }}'
NGINX_OFFICE_LOCATION_FILE=\"$NGINX_DIR/office.$DOMAIN\"_location
VHOST_OFFICE_LOCATION_FILE=\"$VHOST_DIR/office.$DOMAIN\"_location
DISK_SIZE=\"{{ $('API').item.json.body.disk }}\"
# Function to log an error, write to status file, and print to office
handle_error() {
STATUS_JSON=\"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
echo \"$STATUS_JSON\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"error: $1\"
exit 1
}
# Get nginx-proxy IP address before installing Nextcloud Office
get_proxy_ip() {
local ip=\"\"
local retries=10 # Try a few times
local count=0
while [[ -z \"$ip\" && $count -lt $retries ]]; do
ip=$(sudo docker inspect -f '{{ $('Parametrs').item.json.screen_left }}range .NetworkSettings.Networks{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}.IPAddress{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}end{{ $('Parametrs').item.json.screen_right }}' nginx-proxy)
if [[ -z \"$ip\" ]]; then
echo \"[DEBUG] nginx-proxy IP not found, retrying ($count/$retries)...\" >> \"$STATUS_FILE\"
sleep 2 # Wait a bit before retrying
fi
((count++))
done
if [[ -z \"$ip\" ]]; then
echo \"[ERROR] Failed to retrieve nginx-proxy IP after $retries attempts!\" >> \"$STATUS_FILE\"
handle_error \"Failed to retrieve nginx-proxy IP\"
fi
echo \"[DEBUG] Detected nginx-proxy IP: $ip\" >> \"$STATUS_FILE\"
echo \"$ip\"
}
# Get the IP address of Nextcloud Office
get_office_ip() {
local ip=\"\"
local retries=10 # Try a few times
local count=0
while [[ -z \"$ip\" && $count -lt $retries ]]; do
ip=$(sudo docker inspect -f '{{ $('Parametrs').item.json.screen_left }}range .NetworkSettings.Networks{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}.IPAddress{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}end{{ $('Parametrs').item.json.screen_right }}' \"$DOMAIN\"_collabora)
if [[ -z \"$ip\" ]]; then
echo \"[DEBUG] office IP not found, retrying ($count/$retries)...\" >> \"$STATUS_FILE\"
sleep 2 # Wait a bit before retrying
fi
((count++))
done
if [[ -z \"$ip\" ]]; then
echo \"[ERROR] Failed to retrieve office IP after $retries attempts!\" >> \"$STATUS_FILE\"
handle_error \"Failed to retrieve office IP\"
fi
# Convert IP to subnet by replacing the last octet with 0 and adding /24
local subnet=$(echo \"$ip\" | sed 's/\.[0-9]*$/.0\/24/')
echo \"[DEBUG] Detected office subnet: $subnet\" >> \"$STATUS_FILE\"
echo \"$subnet\"
}
# Check if the compose file exists before stopping the container
if [ -f \"$COMPOSE_FILE\" ]; then
sudo docker-compose -f \"$COMPOSE_FILE\" down > /dev/null 2>&1 || handle_error \"Failed to stop containers\"
else
handle_error \"docker-compose.yml not found\"
fi
# Unmount the image if it is currently mounted
if mount | grep -q \"$MOUNT_DIR\"; then
sudo umount \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to unmount $MOUNT_DIR\"
fi
# Create docker-compose.yml file
echo -e \"$DOCKER_COMPOSE_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$COMPOSE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $COMPOSE_FILE\"
# Create NGINX configuration files
echo -e \"$NGINX_MAIN_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_MAIN_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_FILE\"
echo -e \"$NGINX_MAIN_LOCATION_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_MAIN_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_LOCATION_FILE\"
echo -e \"$NGINX_OFFICE_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_OFFICE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_OFFICE_FILE\"
echo -e \"$NGINX_OFFICE_LOCATION_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_OFFICE_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_OFFICE_LOCATION_FILE\"
# Resize or extend the disk image to match DISK_SIZE
if [ -f \"$IMG_FILE\" ]; then
DESIRED_SIZE_BYTES=$((DISK_SIZE * 1024 * 1024 * 1024))
CURRENT_SIZE_BYTES=$(stat -c %s \"$IMG_FILE\")
# Expand or shrink as needed
if [ \"$CURRENT_SIZE_BYTES\" -lt \"$DESIRED_SIZE_BYTES\" ]; then
# echo \"[INFO] Expanding image to $DISK_SIZE GB...\"
sudo truncate -s \"$DESIRED_SIZE_BYTES\" \"$IMG_FILE\" || handle_error \"Failed to expand $IMG_FILE\" 2>/dev/null
LOOP_DEV=$(sudo losetup --find --show \"$IMG_FILE\" 2>/dev/null) || handle_error \"Failed to setup loop device\"
sudo e2fsck -fy \"$LOOP_DEV\" || { sudo losetup -d \"$LOOP_DEV\"; handle_error \"Filesystem check failed\" ; } 2>/dev/null
sudo resize2fs \"$LOOP_DEV\" || { sudo losetup -d \"$LOOP_DEV\"; handle_error \"resize2fs after expand failed\" ; } 2>/dev/null
sudo losetup -d \"$LOOP_DEV\" 2>/dev/null
elif [ \"$CURRENT_SIZE_BYTES\" -gt \"$DESIRED_SIZE_BYTES\" ]; then
# echo \"[INFO] Shrinking image to $DISK_SIZE GB...\"
LOOP_DEV=$(sudo losetup --find --show \"$IMG_FILE\" 2>/dev/null) || handle_error \"Failed to setup loop device\"
sudo e2fsck -fy \"$LOOP_DEV\" || { sudo losetup -d \"$LOOP_DEV\"; handle_error \"Filesystem check failed\" ; } 2>/dev/null
sudo resize2fs -M \"$LOOP_DEV\" || { sudo losetup -d \"$LOOP_DEV\"; handle_error \"resize2fs -M failed\" ; } 2>/dev/null
BLOCKS=$(sudo tune2fs -l \"$LOOP_DEV\" | grep '^Block count:' | awk '{print $3}')
BLOCK_SIZE=$(sudo tune2fs -l \"$LOOP_DEV\" | grep '^Block size:' | awk '{print $3}')
MIN_BYTES=$((BLOCKS * BLOCK_SIZE))
sudo losetup -d \"$LOOP_DEV\" 2>/dev/null
if [ \"$DESIRED_SIZE_BYTES\" -lt \"$MIN_BYTES\" ]; then
handle_error \"DISK_SIZE too small. Minimum size is $((MIN_BYTES / 1024 / 1024 / 1024)) GB\"
fi
sudo truncate -s \"$DESIRED_SIZE_BYTES\" \"$IMG_FILE\" || handle_error \"Failed to truncate to desired size\"
LOOP_DEV=$(sudo losetup --find --show \"$IMG_FILE\" 2>/dev/null) || handle_error \"Failed to setup loop device (after shrink)\"
sudo resize2fs \"$LOOP_DEV\" || { sudo losetup -d \"$LOOP_DEV\"; handle_error \"resize2fs after shrink failed\" ; } 2>/dev/null
sudo losetup -d \"$LOOP_DEV\" 2>/dev/null
fi
# Remove the old line from /etc/fstab (if it exists) and add it again
sudo sed -i \"\|$IMG_FILE|d\" /etc/fstab
echo \"$IMG_FILE $MOUNT_DIR ext4 loop 0 0\" | sudo tee -a /etc/fstab > /dev/null || handle_error \"Failed to update /etc/fstab\"
# Create the folder if it doesn't exist
sudo mkdir -p \"$MOUNT_DIR\"
sudo chmod 777 \"$MOUNT_DIR\"
# Try to mount manually
if ! sudo mount \"$MOUNT_DIR\"; then
echo \"[WARN] mount -a failed, trying manual mount with loop\"
LOOP_DEV=$(sudo losetup --find --show \"$IMG_FILE\") || handle_error \"Failed to setup loop device (manual)\"
sudo mount -t ext4 \"$LOOP_DEV\" \"$MOUNT_DIR\" || {
sudo losetup -d \"$LOOP_DEV\"
handle_error \"Manual mount failed\"
}
fi
else
handle_error \"Disk image $IMG_FILE does not exist\"
fi
# Mount the disk only if it is not already mounted
if ! mount | grep -q \"$MOUNT_DIR\"; then
sudo mount -a || handle_error \"Failed to mount entries from /etc/fstab\"
fi
# Change to the compose directory
cd \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to change directory to $COMPOSE_DIR\"
# Copy NGINX configuration files instead of creating symbolic links
sudo cp -f \"$NGINX_MAIN_FILE\" \"$VHOST_MAIN_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_FILE to $VHOST_MAIN_FILE\"
sudo chmod 777 \"$VHOST_MAIN_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_FILE\"
sudo cp -f \"$NGINX_MAIN_LOCATION_FILE\" \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_LOCATION_FILE to $VHOST_MAIN_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_LOCATION_FILE\"
sudo cp -f \"$NGINX_OFFICE_FILE\" \"$VHOST_OFFICE_FILE\" || handle_error \"Failed to copy $NGINX_OFFICE_FILE to $VHOST_OFFICE_FILE\"
sudo chmod 777 \"$VHOST_OFFICE_FILE\" || handle_error \"Failed to set permissions on $VHOST_OFFICE_FILE\"
sudo cp -f \"$NGINX_OFFICE_LOCATION_FILE\" \"$VHOST_OFFICE_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_OFFICE_LOCATION_FILE to $VHOST_OFFICE_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_OFFICE_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_OFFICE_LOCATION_FILE\"
# Start Docker containers using docker-compose
if ! sudo docker compose up -d > /dev/null 2>error.log; then
ERROR_MSG=$(tail -n 10 error.log) # Read the last 10 lines from error.log
handle_error \"Docker-compose failed: $ERROR_MSG\"
fi
# --- Function that installs Nextcloud Office (Collabora) in the background ---
install_nextcloud_office() {
MAX_RETRIES=60
COUNTER=0
# 1) Wait until \"installed: true\" in occ status
while true; do
STATUS_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ status 2>&1)\"
if echo \"$STATUS_OUTPUT\" | grep -q \"installed: true\"; then
echo \"[OfficeSetup] Nextcloud reports installed: true. Proceeding...\" >> \"$STATUS_FILE\"
break
else
echo \"[OfficeSetup] [$COUNTER/$MAX_RETRIES] Nextcloud not fully installed yet, waiting...\" >> \"$STATUS_FILE\"
sleep 2
((COUNTER++))
if [ $COUNTER -ge $MAX_RETRIES ]; then
echo \"[OfficeSetup] Nextcloud did not report 'installed: true' within time limit. Skipping Office install.\" >> \"$STATUS_FILE\"
return
fi
fi
done
# Get the nginx-proxy IP
PROXY_IP=$(get_proxy_ip)
echo \"[OfficeSetup] Detected nginx-proxy IP: $PROXY_IP\" >> \"$STATUS_FILE\"
# Write the needed parameters to the Nextcloud config
echo \"[OfficeSetup] Setting overwrite protocol/host/cli.url in Nextcloud config...\" >> \"$STATUS_FILE\"
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwriteprotocol --value=https 2>&1
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwritehost --value=\"$DOMAIN\" 2>&1
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwrite.cli.url --value=\"https://$DOMAIN\" 2>&1
# Add the nginx-proxy IP to the trusted_proxies list
echo \"[OfficeSetup] Adding nginx-proxy IP to trusted_proxies...\" >> \"$STATUS_FILE\"
# *** NEW BLOCK *** - Get the IP address of the reverse proxy
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set trusted_proxies 0 --value=\"$PROXY_IP\" 2>&1
echo \"[OfficeSetup] Installing Nextcloud Office (richdocuments)...\" >> \"$STATUS_FILE\"
# 2) Install the richdocuments app
INSTALL_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:install richdocuments 2>&1 || echo \"[OfficeSetup] App already installed\")\"
echo \"[OfficeSetup] app:install richdocuments => $INSTALL_OUTPUT\" >> \"$STATUS_FILE\"
# 3) Set the Collabora Online URL in Nextcloud
WOPI_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:app:set richdocuments wopi_url --value=\"https://office.$DOMAIN/\" 2>&1)\"
echo \"[OfficeSetup] wopi_url => $WOPI_OUTPUT\" >> \"$STATUS_FILE\"
# 4) Enable the app
ENABLE_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:enable richdocuments 2>&1)\"
echo \"[OfficeSetup] app:enable richdocuments => $ENABLE_OUTPUT\" >> \"$STATUS_FILE\"
# 5) Allow local remote servers (Fix for Collabora access issues)
ALLOW_LOCAL_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set allow_local_remote_servers --value=true --type=bool 2>&1)\"
echo \"[OfficeSetup] allow_local_remote_servers => $ALLOW_LOCAL_OUTPUT\" >> \"$STATUS_FILE\"
# 6) Apply changes by running maintenance repair
REPAIR_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ maintenance:repair 2>&1)\"
echo \"[OfficeSetup] maintenance:repair => $REPAIR_OUTPUT\" >> \"$STATUS_FILE\"
# 7) Activate Collabora Online configuration
ACTIVATE_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ richdocuments:activate-config 2>&1)\"
echo \"[OfficeSetup] richdocuments:activate-config => $ACTIVATE_OUTPUT\" >> \"$STATUS_FILE\"
# 8) Refresh cache by scanning all files
SCAN_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ files:scan --all 2>&1)\"
echo \"[OfficeSetup] files:scan --all => $SCAN_OUTPUT\" >> \"$STATUS_FILE\"
# 9) Double-check if the app is enabled
APP_LIST=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:list 2>&1)\"
echo \"[OfficeSetup] occ app:list => $APP_LIST\" >> \"$STATUS_FILE\"
# 10) Perform the migrations
MIGRATION_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ maintenance:repair --include-expensive 2>&1)\"
echo \"[OfficeSetup] maintenance:repair --include-expensive => $MIGRATION_OUTPUT\" >> \"$STATUS_FILE\"
if echo \"$APP_LIST\" | grep -q \"richdocuments: enabled\"; then
echo \"[OfficeSetup] Nextcloud Office successfully installed and configured!\" >> \"$STATUS_FILE\"
else
echo \"[OfficeSetup] Nextcloud Office installation failed or not enabled.\" >> \"$STATUS_FILE\"
fi
OFFICE_IP_SUBNET=$(get_office_ip)
echo \"[OfficeSetup] Detected office IP: $OFFICE_IP_SUBNET\" >> \"$STATUS_FILE\"
# Write the needed parameters to the Collabora config
# 1) Collabora
ACTIVATE_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:app:set richdocuments wopi_allowlist --value=\"$OFFICE_IP_SUBNET\" 2>&1)\"
echo \"[OfficeSetup] richdocuments:wopi_allowlist => $ACTIVATE_OUTPUT\" >> \"$STATUS_FILE\"
}
# Export DOMAIN so it's visible to the function in background
export DOMAIN
export CONTAINER_NAME
# Export the get_proxy_ip function for visibility in nohup
export -f get_proxy_ip
# Export the get_office_ip function for visibility in nohup
export -f get_office_ip
# Run the installation in the background, no blocking
nohup bash -c \"$(
declare -f install_nextcloud_office
echo 'install_nextcloud_office'
)\" > /tmp/office_install.log 2>&1 &
# Update status file
echo \"active\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "5c01e300-9eb2-42db-b609-1ddb1d0140e7",
"name": "Terminated",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1920,
660
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
COMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"
STATUS_FILE=\"$COMPOSE_DIR/status\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
CRON_SCRIPT=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN/cron.sh\"
VHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
VHOST_OFFICE_FILE=\"$VHOST_DIR/office.$DOMAIN\"
VHOST_OFFICE_LOCATION_FILE=\"$VHOST_DIR/office.$DOMAIN\"_location
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"
# Function to log an error, write to status file, and print to office
handle_error() {
STATUS_JSON=\"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
echo \"error: $1\"
exit 1
}
# Function to remove Nextcloud cron job
remove_nextcloud_cron() {
echo \"[CRON] Removing Nextcloud cron job...\" >> /dev/null
# Remove from crontab
crontab -l 2>/dev/null | grep -v \"$CONTAINER_NAME\" | crontab -
echo \"[CRON] Nextcloud cron job removed successfully!\" >> /dev/null
}
# Stop and remove the Docker containers
if [ -f \"$COMPOSE_FILE\" ]; then
sudo docker-compose -f \"$COMPOSE_FILE\" down > /dev/null 2>&1
fi
# Remove the mount entry from /etc/fstab if it exists
if grep -q \"$IMG_FILE\" /etc/fstab; then
sudo sed -i \"\|$(printf '%s\n' \"$IMG_FILE\" | sed 's/[.[\*^$]/\\&/g')|d\" /etc/fstab
fi
# Unmount the image if it is still mounted
if mount | grep -q \"$MOUNT_DIR\"; then
sudo umount \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to unmount $MOUNT_DIR\"
fi
# Remove all related directories and files
for item in \"$COMPOSE_DIR\" \"$VHOST_MAIN_FILE\" \"$VHOST_MAIN_LOCATION_FILE\" \"$VHOST_OFFICE_FILE\" \"$VHOST_OFFICE_LOCATION_FILE\"; do
if [ -e \"$item\" ]; then
sudo rm -rf \"$item\" || handle_error \"Failed to remove $item\"
fi
done
export CONTAINER_NAME
# Remove the cron after execution
remove_nextcloud_cron
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "264dac81-0eda-4a49-b209-c7dda4dd649d",
"name": "Unsuspend",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1920,
480
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
COMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"
STATUS_FILE=\"$COMPOSE_DIR/status\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"
DOCKER_COMPOSE_TEXT='{{ JSON.stringify($('Deploy-docker-compose').item.json[\"docker-compose\"]).base64Encode() }}'
NGINX_MAIN_TEXT='{{ JSON.stringify($('nginx').item.json['main']).base64Encode() }}'
NGINX_MAIN_FILE=\"$NGINX_DIR/$DOMAIN\"
VHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"
NGINX_MAIN_LOCATION_TEXT='{{ JSON.stringify($('nginx').item.json['main_location']).base64Encode() }}'
NGINX_MAIN_LOCATION_FILE=\"$NGINX_DIR/$DOMAIN\"_location
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
NGINX_OFFICE_TEXT='{{ JSON.stringify($('nginx').item.json['office']).base64Encode() }}'
NGINX_OFFICE_FILE=\"$NGINX_DIR/office.$DOMAIN\"
VHOST_OFFICE_FILE=\"$VHOST_DIR/office.$DOMAIN\"
NGINX_OFFICE_LOCATION_TEXT='{{ JSON.stringify($('nginx').item.json['office_location']).base64Encode() }}'
NGINX_OFFICE_LOCATION_FILE=\"$NGINX_DIR/office.$DOMAIN\"_location
VHOST_OFFICE_LOCATION_FILE=\"$VHOST_DIR/office.$DOMAIN\"_location
DISK_SIZE=\"{{ $('API').item.json.body.disk }}\"
# Function to log an error, write to status file, and print to office
handle_error() {
STATUS_JSON=\"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
echo \"$STATUS_JSON\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"error: $1\"
exit 1
}
# Get nginx-proxy IP address before installing Nextcloud Office
get_proxy_ip() {
local ip=\"\"
local retries=10 # Try a few times
local count=0
while [[ -z \"$ip\" && $count -lt $retries ]]; do
ip=$(sudo docker inspect -f '{{ $('Parametrs').item.json.screen_left }}range .NetworkSettings.Networks{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}.IPAddress{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}end{{ $('Parametrs').item.json.screen_right }}' nginx-proxy)
if [[ -z \"$ip\" ]]; then
echo \"[DEBUG] nginx-proxy IP not found, retrying ($count/$retries)...\" >> \"$STATUS_FILE\"
sleep 2 # Wait a bit before retrying
fi
((count++))
done
if [[ -z \"$ip\" ]]; then
echo \"[ERROR] Failed to retrieve nginx-proxy IP after $retries attempts!\" >> \"$STATUS_FILE\"
handle_error \"Failed to retrieve nginx-proxy IP\"
fi
echo \"[DEBUG] Detected nginx-proxy IP: $ip\" >> \"$STATUS_FILE\"
echo \"$ip\"
}
# Get the IP address of Nextcloud Office
get_office_ip() {
local ip=\"\"
local retries=10 # Try a few times
local count=0
while [[ -z \"$ip\" && $count -lt $retries ]]; do
ip=$(sudo docker inspect -f '{{ $('Parametrs').item.json.screen_left }}range .NetworkSettings.Networks{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}.IPAddress{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}end{{ $('Parametrs').item.json.screen_right }}' \"$DOMAIN\"_collabora)
if [[ -z \"$ip\" ]]; then
echo \"[DEBUG] office IP not found, retrying ($count/$retries)...\" >> \"$STATUS_FILE\"
sleep 2 # Wait a bit before retrying
fi
((count++))
done
if [[ -z \"$ip\" ]]; then
echo \"[ERROR] Failed to retrieve office IP after $retries attempts!\" >> \"$STATUS_FILE\"
handle_error \"Failed to retrieve office IP\"
fi
# Convert IP to subnet by replacing the last octet with 0 and adding /24
local subnet=$(echo \"$ip\" | sed 's/\.[0-9]*$/.0\/24/')
echo \"[DEBUG] Detected office subnet: $subnet\" >> \"$STATUS_FILE\"
echo \"$subnet\"
}
# Create necessary directories with permissions
for dir in \"$COMPOSE_DIR\" \"$NGINX_DIR\" \"$MOUNT_DIR\"; do
sudo mkdir -p \"$dir\" || handle_error \"Failed to create $dir\"
sudo chmod -R 777 \"$dir\" || handle_error \"Failed to set permissions on $dir\"
done
# Check if the image is already mounted using fstab
if ! grep -q \"$IMG_FILE\" /etc/fstab; then
echo \"$IMG_FILE $MOUNT_DIR ext4 loop 0 0\" | sudo tee -a /etc/fstab > /dev/null || handle_error \"Failed to add fstab entry for $IMG_FILE\"
fi
# Apply the fstab changes and mount the image
if ! mount | grep -q \"$MOUNT_DIR\"; then
sudo mount -a || handle_error \"Failed to mount image using fstab\"
fi
# Create docker-compose.yml file
echo -e \"$DOCKER_COMPOSE_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$COMPOSE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $COMPOSE_FILE\"
# Create NGINX configuration files
echo -e \"$NGINX_MAIN_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_MAIN_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_FILE\"
echo -e \"$NGINX_MAIN_LOCATION_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_MAIN_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_LOCATION_FILE\"
echo -e \"$NGINX_OFFICE_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_OFFICE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_OFFICE_FILE\"
echo -e \"$NGINX_OFFICE_LOCATION_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_OFFICE_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_OFFICE_LOCATION_FILE\"
# Copy NGINX configuration files instead of creating symbolic links
sudo cp -f \"$NGINX_MAIN_FILE\" \"$VHOST_MAIN_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_FILE to $VHOST_MAIN_FILE\"
sudo chmod 777 \"$VHOST_MAIN_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_FILE\"
sudo cp -f \"$NGINX_MAIN_LOCATION_FILE\" \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_LOCATION_FILE to $VHOST_MAIN_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_LOCATION_FILE\"
sudo cp -f \"$NGINX_OFFICE_FILE\" \"$VHOST_OFFICE_FILE\" || handle_error \"Failed to copy $NGINX_OFFICE_FILE to $VHOST_OFFICE_FILE\"
sudo chmod 777 \"$VHOST_OFFICE_FILE\" || handle_error \"Failed to set permissions on $VHOST_OFFICE_FILE\"
sudo cp -f \"$NGINX_OFFICE_LOCATION_FILE\" \"$VHOST_OFFICE_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_OFFICE_LOCATION_FILE to $VHOST_OFFICE_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_OFFICE_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_OFFICE_LOCATION_FILE\"
# Change to the compose directory
cd \"$COMPOSE_DIR\" || handle_error \"Failed to change directory to $COMPOSE_DIR\"
# Start Docker containers using docker-compose
> error.log
if ! sudo docker compose up -d > error.log 2>&1; then
ERROR_MSG=$(tail -n 10 error.log) # Read the last 10 lines from error.log
handle_error \"Docker-compose failed: $ERROR_MSG\"
fi
# Function to add Nextcloud cron job
add_nextcloud_cron() {
echo \"[CRON] Adding Nextcloud cron job...\" >> /dev/null
# Create cron command
CRON_CMD=\"*/5 * * * * sudo docker exec -u www-data $CONTAINER_NAME php cron.php --force\"
# Add to crontab (remove old if exists)
(crontab -l 2>/dev/null | grep -v \"$CONTAINER_NAME\"; echo \"$CRON_CMD\") | crontab -
echo \"[CRON] Nextcloud cron job added successfully!\" >> /dev/null
}
# Function to remove Nextcloud cron job
remove_nextcloud_cron() {
echo \"[CRON] Removing Nextcloud cron job...\" >> /dev/null
# Remove from crontab
crontab -l 2>/dev/null | grep -v \"$CONTAINER_NAME\" | crontab -
echo \"[CRON] Nextcloud cron job removed successfully!\" >> /dev/null
}
# --- Function that installs Nextcloud Office (Collabora) in the background ---
install_nextcloud_office() {
MAX_RETRIES=60
COUNTER=0
# 1) Wait until \"installed: true\" in occ status
while true; do
STATUS_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ status 2>&1)\"
if echo \"$STATUS_OUTPUT\" | grep -q \"installed: true\"; then
echo \"[OfficeSetup] Nextcloud reports installed: true. Proceeding...\" >> \"$STATUS_FILE\"
break
else
echo \"[OfficeSetup] [$COUNTER/$MAX_RETRIES] Nextcloud not fully installed yet, waiting...\" >> \"$STATUS_FILE\"
sleep 2
((COUNTER++))
if [ $COUNTER -ge $MAX_RETRIES ]; then
echo \"[OfficeSetup] Nextcloud did not report 'installed: true' within time limit. Skipping Office install.\" >> \"$STATUS_FILE\"
return
fi
fi
done
# Get the nginx-proxy IP
PROXY_IP=$(get_proxy_ip)
echo \"[OfficeSetup] Detected nginx-proxy IP: $PROXY_IP\" >> \"$STATUS_FILE\"
# Write the needed parameters to the Nextcloud config
echo \"[OfficeSetup] Setting overwrite protocol/host/cli.url in Nextcloud config...\" >> \"$STATUS_FILE\"
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwriteprotocol --value=https 2>&1
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwritehost --value=\"$DOMAIN\" 2>&1
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwrite.cli.url --value=\"https://$DOMAIN\" 2>&1
# Add the nginx-proxy IP to the trusted_proxies list
echo \"[OfficeSetup] Adding nginx-proxy IP to trusted_proxies...\" >> \"$STATUS_FILE\"
# *** NEW BLOCK *** - Get the IP address of the reverse proxy
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set trusted_proxies 0 --value=\"$PROXY_IP\" 2>&1
echo \"[OfficeSetup] Installing Nextcloud Office (richdocuments)...\" >> \"$STATUS_FILE\"
# 2) Install the richdocuments app
INSTALL_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:install richdocuments 2>&1 || echo \"[OfficeSetup] App already installed\")\"
echo \"[OfficeSetup] app:install richdocuments => $INSTALL_OUTPUT\" >> \"$STATUS_FILE\"
# 3) Set the Collabora Online URL in Nextcloud
WOPI_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:app:set richdocuments wopi_url --value=\"https://office.$DOMAIN/\" 2>&1)\"
echo \"[OfficeSetup] wopi_url => $WOPI_OUTPUT\" >> \"$STATUS_FILE\"
# 4) Enable the app
ENABLE_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:enable richdocuments 2>&1)\"
echo \"[OfficeSetup] app:enable richdocuments => $ENABLE_OUTPUT\" >> \"$STATUS_FILE\"
# 5) Allow local remote servers (Fix for Collabora access issues)
ALLOW_LOCAL_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set allow_local_remote_servers --value=true --type=bool 2>&1)\"
echo \"[OfficeSetup] allow_local_remote_servers => $ALLOW_LOCAL_OUTPUT\" >> \"$STATUS_FILE\"
# 6) Apply changes by running maintenance repair
REPAIR_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ maintenance:repair 2>&1)\"
echo \"[OfficeSetup] maintenance:repair => $REPAIR_OUTPUT\" >> \"$STATUS_FILE\"
# 7) Activate Collabora Online configuration
ACTIVATE_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ richdocuments:activate-config 2>&1)\"
echo \"[OfficeSetup] richdocuments:activate-config => $ACTIVATE_OUTPUT\" >> \"$STATUS_FILE\"
# 8) Refresh cache by scanning all files
SCAN_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ files:scan --all 2>&1)\"
echo \"[OfficeSetup] files:scan --all => $SCAN_OUTPUT\" >> \"$STATUS_FILE\"
# 9) Double-check if the app is enabled
APP_LIST=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:list 2>&1)\"
echo \"[OfficeSetup] occ app:list => $APP_LIST\" >> \"$STATUS_FILE\"
# 10) Perform the migrations
MIGRATION_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ maintenance:repair --include-expensive 2>&1)\"
echo \"[OfficeSetup] maintenance:repair --include-expensive => $MIGRATION_OUTPUT\" >> \"$STATUS_FILE\"
if echo \"$APP_LIST\" | grep -q \"richdocuments: enabled\"; then
echo \"[OfficeSetup] Nextcloud Office successfully installed and configured!\" >> \"$STATUS_FILE\"
else
echo \"[OfficeSetup] Nextcloud Office installation failed or not enabled.\" >> \"$STATUS_FILE\"
fi
OFFICE_IP_SUBNET=$(get_office_ip)
echo \"[OfficeSetup] Detected office IP: $OFFICE_IP_SUBNET\" >> \"$STATUS_FILE\"
# Write the needed parameters to the Collabora config
# 1) Collabora
ACTIVATE_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:app:set richdocuments wopi_allowlist --value=\"$OFFICE_IP_SUBNET\" 2>&1)\"
echo \"[OfficeSetup] richdocuments:wopi_allowlist => $ACTIVATE_OUTPUT\" >> \"$STATUS_FILE\"
# 2) Add Nextcloud cron job
add_nextcloud_cron
}
# Export DOMAIN so it's visible to the function in background
export DOMAIN
export CONTAINER_NAME
# Export the get_proxy_ip function for visibility in nohup
export -f get_proxy_ip
# Export the get_office_ip function for visibility in nohup
export -f get_office_ip
# Export the add_nextcloud_cron function for visibility in nohup
export -f add_nextcloud_cron
# Run the installation in the background
nohup bash -c \"$(
declare -f install_nextcloud_office
echo 'install_nextcloud_office'
)\" > /tmp/office_install.log 2>&1 &
# If everything is successful, update the status file and print success message
echo \"active\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "616b6f8b-707f-473f-8065-f3e1623ece2c",
"name": "Suspend",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1920,
320
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
COMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"
STATUS_FILE=\"$COMPOSE_DIR/status\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"
VHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
VHOST_OFFICE_FILE=\"$VHOST_DIR/office.$DOMAIN\"
VHOST_OFFICE_LOCATION_FILE=\"$VHOST_DIR/office.$DOMAIN\"_location
# Function to log an error, write to status file, and print to office
handle_error() {
STATUS_JSON=\"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
echo \"$STATUS_JSON\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"error: $1\"
exit 1
}
# Stop and remove Docker containers (also remove associated volumes)
if [ -f \"$COMPOSE_FILE\" ]; then
if ! sudo docker-compose -f \"$COMPOSE_FILE\" down > /dev/null 2>&1; then
handle_error \"Failed to stop and remove docker-compose containers\"
fi
else
echo \"Warning: docker-compose.yml not found, skipping container stop.\"
fi
# Remove mount entry from /etc/fstab if it exists
if grep -q \"$IMG_FILE\" /etc/fstab; then
sudo sed -i \"\|$(printf '%s\n' \"$IMG_FILE\" | sed 's/[.[\*^$]/\\&/g')|d\" /etc/fstab
fi
# Unmount the image if it is mounted
if mount | grep -q \"$MOUNT_DIR\"; then
sudo umount \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to unmount $MOUNT_DIR\"
fi
# Remove the mount directory
if [ -d \"$MOUNT_DIR\" ]; then
sudo rm -rf \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to remove $MOUNT_DIR\"
fi
# Remove NGINX configuration files
[ -f \"$VHOST_MAIN_FILE\" ] && sudo rm -f \"$VHOST_MAIN_FILE\" || handle_error \"Warning: $VHOST_MAIN_FILE not found.\"
[ -f \"$VHOST_MAIN_LOCATION_FILE\" ] && sudo rm -f \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Warning: $VHOST_MAIN_LOCATION_FILE not found.\"
[ -f \"$VHOST_OFFICE_FILE\" ] && sudo rm -f \"$VHOST_OFFICE_FILE\" || handle_error \"Warning: $VHOST_OFFICE_FILE not found.\"
[ -f \"$VHOST_OFFICE_LOCATION_FILE\" ] && sudo rm -f \"$VHOST_OFFICE_LOCATION_FILE\" || handle_error \"Warning: $VHOST_OFFICE_LOCATION_FILE not found.\"
# Update status
echo \"suspended\" | sudo tee \"$STATUS_FILE\" > /dev/null
# Success
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "e09ef109-d4bd-4d2f-acad-a442854bc299",
"name": "Deploy",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1920,
160
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Get values for variables from templates
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
COMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"
STATUS_FILE=\"$COMPOSE_DIR/status\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"
DOCKER_COMPOSE_TEXT='{{ JSON.stringify($('Deploy-docker-compose').item.json['docker-compose']).base64Encode() }}'
NGINX_MAIN_TEXT='{{ JSON.stringify($('nginx').item.json['main']).base64Encode() }}'
NGINX_MAIN_FILE=\"$NGINX_DIR/$DOMAIN\"
VHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"
NGINX_MAIN_LOCATION_TEXT='{{ JSON.stringify($('nginx').item.json['main_location']).base64Encode() }}'
NGINX_MAIN_LOCATION_FILE=\"$NGINX_DIR/$DOMAIN\"_location
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
NGINX_OFFICE_TEXT='{{ JSON.stringify($('nginx').item.json['office']).base64Encode() }}'
NGINX_OFFICE_FILE=\"$NGINX_DIR/office.$DOMAIN\"
VHOST_OFFICE_FILE=\"$VHOST_DIR/office.$DOMAIN\"
NGINX_OFFICE_LOCATION_TEXT='{{ JSON.stringify($('nginx').item.json['office_location']).base64Encode() }}'
NGINX_OFFICE_LOCATION_FILE=\"$NGINX_DIR/office.$DOMAIN\"_location
VHOST_OFFICE_LOCATION_FILE=\"$VHOST_DIR/office.$DOMAIN\"_location
DISK_SIZE=\"{{ $('API').item.json.body.disk }}\"
# Get nginx-proxy IP address before installing Nextcloud Office
get_proxy_ip() {
local ip=\"\"
local retries=10 # Try a few times
local count=0
while [[ -z \"$ip\" && $count -lt $retries ]]; do
ip=$(sudo docker inspect -f '{{ $('Parametrs').item.json.screen_left }}range .NetworkSettings.Networks{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}.IPAddress{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}end{{ $('Parametrs').item.json.screen_right }}' nginx-proxy)
if [[ -z \"$ip\" ]]; then
echo \"[DEBUG] nginx-proxy IP not found, retrying ($count/$retries)...\" >> \"$STATUS_FILE\"
sleep 2 # Wait a bit before retrying
fi
((count++))
done
if [[ -z \"$ip\" ]]; then
echo \"[ERROR] Failed to retrieve nginx-proxy IP after $retries attempts!\" >> \"$STATUS_FILE\"
handle_error \"Failed to retrieve nginx-proxy IP\"
fi
echo \"[DEBUG] Detected nginx-proxy IP: $ip\" >> \"$STATUS_FILE\"
echo \"$ip\"
}
# Get the IP address of Nextcloud Office
get_office_ip() {
local ip=\"\"
local retries=10 # Try a few times
local count=0
while [[ -z \"$ip\" && $count -lt $retries ]]; do
ip=$(sudo docker inspect -f '{{ $('Parametrs').item.json.screen_left }}range .NetworkSettings.Networks{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}.IPAddress{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}end{{ $('Parametrs').item.json.screen_right }}' \"$DOMAIN\"_collabora)
if [[ -z \"$ip\" ]]; then
echo \"[DEBUG] office IP not found, retrying ($count/$retries)...\" >> \"$STATUS_FILE\"
sleep 2 # Wait a bit before retrying
fi
((count++))
done
if [[ -z \"$ip\" ]]; then
echo \"[ERROR] Failed to retrieve office IP after $retries attempts!\" >> \"$STATUS_FILE\"
handle_error \"Failed to retrieve office IP\"
fi
# Convert IP to subnet by replacing the last octet with 0 and adding /24
local subnet=$(echo \"$ip\" | sed 's/\.[0-9]*$/.0\/24/')
echo \"[DEBUG] Detected office subnet: $subnet\" >> \"$STATUS_FILE\"
echo \"$subnet\"
}
# Function to handle errors: write to the status file and print the message to office
handle_error() {
STATUS_JSON=\"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
echo \"$STATUS_JSON\" | sudo tee \"$STATUS_FILE\" > /dev/null # Write error to the status file
echo \"error: $1\" # Print the error message to the office
exit 1 # Exit the script with an error code
}
# Check if the directory already exists. If yes, exit with an error.
if [ -d \"$COMPOSE_DIR\" ]; then
echo \"error: Directory $COMPOSE_DIR already exists\"
exit 1
fi
# Create necessary directories with permissions
sudo mkdir -p \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to create $COMPOSE_DIR\"
sudo mkdir -p \"$NGINX_DIR\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_DIR\"
sudo mkdir -p \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to create $MOUNT_DIR\"
# Set permissions on the created directories
sudo chmod -R 777 \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $COMPOSE_DIR\"
sudo chmod -R 777 \"$NGINX_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $NGINX_DIR\"
sudo chmod -R 777 \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $MOUNT_DIR\"
# Create docker-compose.yml file
echo -e \"$DOCKER_COMPOSE_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$COMPOSE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $COMPOSE_FILE\"
# Create NGINX configuration files
echo -e \"$NGINX_MAIN_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_MAIN_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_FILE\"
echo -e \"$NGINX_MAIN_LOCATION_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_MAIN_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_LOCATION_FILE\"
echo -e \"$NGINX_OFFICE_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_OFFICE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_OFFICE_FILE\"
echo -e \"$NGINX_OFFICE_LOCATION_TEXT\" | base64 --decode | sed 's/\\n/\n/g' | sed 's/\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_OFFICE_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_OFFICE_LOCATION_FILE\"
# Change to the compose directory
cd \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to change directory to $COMPOSE_DIR\"
# Create data.img file if it doesn't exist
if [ ! -f \"$IMG_FILE\" ]; then
sudo fallocate -l \"$DISK_SIZE\"G \"$IMG_FILE\" > /dev/null 2>&1 || sudo truncate -s \"$DISK_SIZE\"G \"$IMG_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $IMG_FILE\"
sudo mkfs.ext4 \"$IMG_FILE\" > /dev/null 2>&1 || handle_error \"Failed to format $IMG_FILE\" # Format the image as ext4
sync # Synchronize the data to disk
fi
# Add an entry to /etc/fstab for mounting if not already present
if ! grep -q \"$IMG_FILE\" /etc/fstab; then
echo \"$IMG_FILE $MOUNT_DIR ext4 loop 0 0\" | sudo tee -a /etc/fstab > /dev/null || handle_error \"Failed to add entry to /etc/fstab\"
fi
# Mount all entries in /etc/fstab
sudo mount -a || handle_error \"Failed to mount entries from /etc/fstab\"
# Set permissions on the mount directory
sudo chmod -R 777 \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $MOUNT_DIR\"
# Copy NGINX configuration files instead of creating symbolic links
sudo cp -f \"$NGINX_MAIN_FILE\" \"$VHOST_MAIN_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_FILE to $VHOST_MAIN_FILE\"
sudo chmod 777 \"$VHOST_MAIN_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_FILE\"
sudo cp -f \"$NGINX_MAIN_LOCATION_FILE\" \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_LOCATION_FILE to $VHOST_MAIN_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_LOCATION_FILE\"
sudo cp -f \"$NGINX_OFFICE_FILE\" \"$VHOST_OFFICE_FILE\" || handle_error \"Failed to copy $NGINX_OFFICE_FILE to $VHOST_OFFICE_FILE\"
sudo chmod 777 \"$VHOST_OFFICE_FILE\" || handle_error \"Failed to set permissions on $VHOST_OFFICE_FILE\"
sudo cp -f \"$NGINX_OFFICE_LOCATION_FILE\" \"$VHOST_OFFICE_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_OFFICE_LOCATION_FILE to $VHOST_OFFICE_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_OFFICE_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_OFFICE_LOCATION_FILE\"
# Start Docker containers using docker-compose
if ! sudo docker compose up -d > /dev/null 2>error.log; then
ERROR_MSG=$(tail -n 10 error.log) # Read the last 10 lines from error.log
handle_error \"Docker-compose failed: $ERROR_MSG\"
fi
# Function to add Nextcloud cron job
add_nextcloud_cron() {
echo \"[CRON] Adding Nextcloud cron job...\" >> /dev/null
# Create cron command
CRON_CMD=\"*/5 * * * * sudo docker exec -u www-data $CONTAINER_NAME php cron.php --force\"
# Add to crontab (remove old if exists)
(crontab -l 2>/dev/null | grep -v \"$CONTAINER_NAME\"; echo \"$CRON_CMD\") | crontab -
echo \"[CRON] Nextcloud cron job added successfully!\" >> /dev/null
}
# Function to remove Nextcloud cron job
remove_nextcloud_cron() {
echo \"[CRON] Removing Nextcloud cron job...\" >> /dev/null
# Remove from crontab
crontab -l 2>/dev/null | grep -v \"$CONTAINER_NAME\" | crontab -
echo \"[CRON] Nextcloud cron job removed successfully!\" >> /dev/null
}
# --- Function that installs Nextcloud Office (Collabora) in the background ---
install_nextcloud_office() {
MAX_RETRIES=60
COUNTER=0
# 1) Wait until \"installed: true\" in occ status
while true; do
STATUS_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ status 2>&1)\"
if echo \"$STATUS_OUTPUT\" | grep -q \"installed: true\"; then
echo \"[OfficeSetup] Nextcloud reports installed: true. Proceeding...\" >> \"$STATUS_FILE\"
break
else
echo \"[OfficeSetup] [$COUNTER/$MAX_RETRIES] Nextcloud not fully installed yet, waiting...\" >> \"$STATUS_FILE\"
sleep 2
((COUNTER++))
if [ $COUNTER -ge $MAX_RETRIES ]; then
echo \"[OfficeSetup] Nextcloud did not report 'installed: true' within time limit. Skipping Office install.\" >> \"$STATUS_FILE\"
return
fi
fi
done
# Get the nginx-proxy IP
PROXY_IP=$(get_proxy_ip)
echo \"[OfficeSetup] Detected nginx-proxy IP: $PROXY_IP\" >> \"$STATUS_FILE\"
# Write the needed parameters to the Nextcloud config
echo \"[OfficeSetup] Setting overwrite protocol/host/cli.url in Nextcloud config...\" >> \"$STATUS_FILE\"
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwriteprotocol --value=https 2>&1
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwritehost --value=\"$DOMAIN\" 2>&1
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwrite.cli.url --value=\"https://$DOMAIN\" 2>&1
# Add the nginx-proxy IP to the trusted_proxies list
echo \"[OfficeSetup] Adding nginx-proxy IP to trusted_proxies...\" >> \"$STATUS_FILE\"
# *** NEW BLOCK *** - Get the IP address of the reverse proxy
sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set trusted_proxies 0 --value=\"$PROXY_IP\" 2>&1
echo \"[OfficeSetup] Installing Nextcloud Office (richdocuments)...\" >> \"$STATUS_FILE\"
# 2) Install the richdocuments app
INSTALL_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:install richdocuments 2>&1 || echo \"[OfficeSetup] App already installed\")\"
echo \"[OfficeSetup] app:install richdocuments => $INSTALL_OUTPUT\" >> \"$STATUS_FILE\"
# 3) Set the Collabora Online URL in Nextcloud
WOPI_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:app:set richdocuments wopi_url --value=\"https://office.$DOMAIN/\" 2>&1)\"
echo \"[OfficeSetup] wopi_url => $WOPI_OUTPUT\" >> \"$STATUS_FILE\"
# 4) Enable the app
ENABLE_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:enable richdocuments 2>&1)\"
echo \"[OfficeSetup] app:enable richdocuments => $ENABLE_OUTPUT\" >> \"$STATUS_FILE\"
# 5) Allow local remote servers (Fix for Collabora access issues)
ALLOW_LOCAL_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set allow_local_remote_servers --value=true --type=bool 2>&1)\"
echo \"[OfficeSetup] allow_local_remote_servers => $ALLOW_LOCAL_OUTPUT\" >> \"$STATUS_FILE\"
# 6) Apply changes by running maintenance repair
REPAIR_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ maintenance:repair 2>&1)\"
echo \"[OfficeSetup] maintenance:repair => $REPAIR_OUTPUT\" >> \"$STATUS_FILE\"
# 7) Activate Collabora Online configuration
ACTIVATE_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ richdocuments:activate-config 2>&1)\"
echo \"[OfficeSetup] richdocuments:activate-config => $ACTIVATE_OUTPUT\" >> \"$STATUS_FILE\"
# 8) Refresh cache by scanning all files
SCAN_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ files:scan --all 2>&1)\"
echo \"[OfficeSetup] files:scan --all => $SCAN_OUTPUT\" >> \"$STATUS_FILE\"
# 9) Double-check if the app is enabled
APP_LIST=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:list 2>&1)\"
echo \"[OfficeSetup] occ app:list => $APP_LIST\" >> \"$STATUS_FILE\"
# 10) Perform the migrations
MIGRATION_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ maintenance:repair --include-expensive 2>&1)\"
echo \"[OfficeSetup] maintenance:repair --include-expensive => $MIGRATION_OUTPUT\" >> \"$STATUS_FILE\"
if echo \"$APP_LIST\" | grep -q \"richdocuments: enabled\"; then
echo \"[OfficeSetup] Nextcloud Office successfully installed and configured!\" >> \"$STATUS_FILE\"
else
echo \"[OfficeSetup] Nextcloud Office installation failed or not enabled.\" >> \"$STATUS_FILE\"
fi
OFFICE_IP_SUBNET=$(get_office_ip)
echo \"[OfficeSetup] Detected office IP: $OFFICE_IP_SUBNET\" >> \"$STATUS_FILE\"
# Write the needed parameters to the Collabora config
# 1) Collabora
ACTIVATE_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:app:set richdocuments wopi_allowlist --value=\"$OFFICE_IP_SUBNET\" 2>&1)\"
echo \"[OfficeSetup] richdocuments:wopi_allowlist => $ACTIVATE_OUTPUT\" >> \"$STATUS_FILE\"
# 2) Add Nextcloud cron job
add_nextcloud_cron
}
# Export DOMAIN so it's visible to the function in background
export DOMAIN
export CONTAINER_NAME
# Export the get_proxy_ip function for visibility in nohup
export -f get_proxy_ip
# Export the get_office_ip function for visibility in nohup
export -f get_office_ip
# Export the add_nextcloud_cron function for visibility in nohup
export -f add_nextcloud_cron
# Run the installation in the background
nohup bash -c \"$(
declare -f install_nextcloud_office
echo 'install_nextcloud_office'
)\" > /tmp/office_install.log 2>&1 &
# If everything is successful, update the status file and print success message
echo \"active\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "8b9f1482-cc21-4f7b-aa82-fdb47643d807",
"name": "Service Actions",
"type": "n8n-nodes-base.switch",
"position": [
1640,
140
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "test_connection",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "3afdd2f1-fe93-47c2-95cd-bac9b1d94eeb",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "test_connection"
}
]
},
"renameOutput": true
},
{
"outputKey": "create",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "102f10e9-ec6c-4e63-ba95-0fe6c7dc0bd1",
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "create"
}
]
},
"renameOutput": true
},
{
"outputKey": "suspend",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "f62dfa34-6751-4b34-adcc-3d6ba1b21a8c",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "suspend"
}
]
},
"renameOutput": true
},
{
"outputKey": "unsuspend",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "384d2026-b753-4c27-94c2-8f4fc189eb5f",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "unsuspend"
}
]
},
"renameOutput": true
},
{
"outputKey": "terminate",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "0e190a97-827a-4e87-8222-093ff7048b21",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "terminate"
}
]
},
"renameOutput": true
},
{
"outputKey": "change_package",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "6f7832f3-b61d-4517-ab6b-6007998136dd",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "change_package"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "b27da6f4-859b-4b8a-9542-f0cad5f2cbfc",
"name": "If1",
"type": "n8n-nodes-base.if",
"position": [
920,
320
],
"parameters": {
"options": {},
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "or",
"conditions": [
{
"id": "8602bd4c-9693-4d5f-9e7d-5ee62210baca",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "create"
},
{
"id": "1c630b59-0e5a-441d-8aa5-70b31338d897",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "change_package"
},
{
"id": "b3eb7052-a70f-438e-befd-8c5240df32c7",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "unsuspend"
}
]
}
},
"typeVersion": 2.2
},
{
"id": "0af4d346-d369-412b-b8f1-9847c5deb645",
"name": "Dependent containers Stat",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1300,
1180
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
CONTAINER_NAME_ML=\"{{ $('API').item.json.body.domain }}_collabora\"
CONTAINER_NAME_DB=\"{{ $('API').item.json.body.domain }}_db\"
CONTAINER_NAME_REDIS=\"{{ $('API').item.json.body.domain }}_redis\"
# Initialize empty container data
INSPECT_JSON_ML=\"{}\"
STATS_JSON_ML=\"{}\"
INSPECT_JSON_DB=\"{}\"
STATS_JSON_DB=\"{}\"
INSPECT_JSON_REDIS=\"{}\"
STATS_JSON_REDIS=\"{}\"
# Check if container is running
if sudo docker ps -a --filter \"name=$CONTAINER_NAME_ML\" | grep -q \"$CONTAINER_NAME_ML\"; then
# Get Docker inspect info in JSON (as raw string)
INSPECT_JSON_ML=$(sudo docker inspect \"$CONTAINER_NAME_ML\")
# Get Docker stats info in JSON (as raw string)
STATS_JSON_ML=$(sudo docker stats --no-stream --format \"{{ $('Parametrs').item.json.screen_left }}json .{{ $('Parametrs').item.json.screen_right }}\" \"$CONTAINER_NAME_ML\")
STATS_JSON_ML=${STATS_JSON_ML:-'{}'}
fi
# Check if container is running
if sudo docker ps -a --filter \"name=$CONTAINER_NAME_DB\" | grep -q \"$CONTAINER_NAME_DB\"; then
# Get Docker inspect info in JSON (as raw string)
INSPECT_JSON_DB=$(sudo docker inspect \"$CONTAINER_NAME_DB\")
# Get Docker stats info in JSON (as raw string)
STATS_JSON_DB=$(sudo docker stats --no-stream --format \"{{ $('Parametrs').item.json.screen_left }}json .{{ $('Parametrs').item.json.screen_right }}\" \"$CONTAINER_NAME_DB\")
STATS_JSON_DB=${STATS_JSON_DB:-'{}'}
fi
# Check if container is running
if sudo docker ps -a --filter \"name=$CONTAINER_NAME_REDIS\" | grep -q \"$CONTAINER_NAME_REDIS\"; then
# Get Docker inspect info in JSON (as raw string)
INSPECT_JSON_REDIS=$(sudo docker inspect \"$CONTAINER_NAME_REDIS\")
# Get Docker stats info in JSON (as raw string)
STATS_JSON_REDIS=$(sudo docker stats --no-stream --format \"{{ $('Parametrs').item.json.screen_left }}json .{{ $('Parametrs').item.json.screen_right }}\" \"$CONTAINER_NAME_REDIS\")
STATS_JSON_REDIS=${STATS_JSON_REDIS:-'{}'}
fi
# Manually create a combined JSON object
FINAL_JSON=\"{\\"inspect_ml\\": $INSPECT_JSON_ML, \\"stats_ml\\": $STATS_JSON_ML,\\"inspect_db\\": $INSPECT_JSON_DB, \\"stats_db\\": $STATS_JSON_DB,\\"inspect_redis\\": $INSPECT_JSON_REDIS, \\"stats_redis\\": $STATS_JSON_REDIS}\"
# Output the result
echo \"$FINAL_JSON\"
exit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "2e53a360-deb3-41e7-8ddd-c06a3733e4bd",
"name": "GET ACL",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1400,
2140
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Get values for variables from templates
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
NGINX_MAIN_ACL_FILE=\"$NGINX_DIR/$DOMAIN\"_acl
# Function to log an error and exit
handle_error() {
echo \"error: $1\"
exit 1
}
# Read files if they exist, else assign empty array
if [[ -f \"$NGINX_MAIN_ACL_FILE\" ]]; then
MAIN_IPS=$(cat \"$NGINX_MAIN_ACL_FILE\" | jq -R -s 'split(\"\n\") | map(select(length > 0))')
else
MAIN_IPS=\"[]\"
fi
# Output JSON
echo \"{ \\"main_ips\\": $MAIN_IPS}\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "ec319d39-328f-4af6-a6d1-23ba6efb11d2",
"name": "SET ACL",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1400,
2320
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Get values for variables from templates
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
NGINX_MAIN_ACL_FILE=\"$NGINX_DIR/$DOMAIN\"_acl
NGINX_MAIN_ACL_TEXT=\"{{ $('API').item.json.body.main_ips }}\"
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
NGINX_MAIN_LOCATION_FILE=\"$NGINX_DIR/$DOMAIN\"_location
# Function to log an error and exit
handle_error() {
echo \"error: $1\"
exit 1
}
update_nginx_acl() {
ACL_FILE=$1
LOCATION_FILE=$2
if [ -s \"$ACL_FILE\" ]; then
VALID_LINES=$(grep -vE '^\s*$' \"$ACL_FILE\")
if [ -n \"$VALID_LINES\" ]; then
while IFS= read -r line; do
echo \"allow $line;\" | sudo tee -a \"$LOCATION_FILE\" > /dev/null || handle_error \"Failed to update $LOCATION_FILE\"
done <<< \"$VALID_LINES\"
echo \"deny all;\" | sudo tee -a \"$LOCATION_FILE\" > /dev/null || handle_error \"Failed to update $LOCATION_FILE\"
fi
fi
}
# Create or overwrite the file with the content from variables
echo \"$NGINX_MAIN_ACL_TEXT\" | sudo tee \"$NGINX_MAIN_ACL_FILE\" > /dev/null
sudo cp -f \"$NGINX_MAIN_LOCATION_FILE\" \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_LOCATION_FILE to $VHOST_MAIN_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_LOCATION_FILE\"
update_nginx_acl \"$NGINX_MAIN_ACL_FILE\" \"$VHOST_MAIN_LOCATION_FILE\"
# Reload Nginx with sudo
if sudo docker exec nginx-proxy nginx -s reload; then
echo \"success\"
else
handle_error \"Failed to reload Nginx.\"
fi
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "a80322f9-1d95-4d09-b659-e98cfd31ed4b",
"name": "GET NET",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1400,
2460
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Get values for variables from templates
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
NET_IN_FILE=\"$COMPOSE_DIR/net_in\"
NET_OUT_FILE=\"$COMPOSE_DIR/net_out\"
# Function to log an error and exit
handle_error() {
echo \"error: $1\"
exit 1
}
# Get current network statistics from container
STATS=$(sudo docker exec \"$CONTAINER_NAME\" cat /proc/net/dev | grep eth0) || handle_error \"Failed to get network stats\"
NET_IN_NEW=$(echo \"$STATS\" | awk '{print $2}') # RX bytes (received)
NET_OUT_NEW=$(echo \"$STATS\" | awk '{print $10}') # TX bytes (transmitted)
# Ensure directory exists
mkdir -p \"$COMPOSE_DIR\"
# Read old values, create files if they don't exist
if [[ -f \"$NET_IN_FILE\" ]]; then
NET_IN_OLD=$(sudo cat \"$NET_IN_FILE\")
else
NET_IN_OLD=0
fi
if [[ -f \"$NET_OUT_FILE\" ]]; then
NET_OUT_OLD=$(sudo cat \"$NET_OUT_FILE\")
else
NET_OUT_OLD=0
fi
# Save new values
echo \"$NET_IN_NEW\" | sudo tee \"$NET_IN_FILE\" > /dev/null
echo \"$NET_OUT_NEW\" | sudo tee \"$NET_OUT_FILE\" > /dev/null
# Output JSON
echo \"{ \\"net_in_new\\": $NET_IN_NEW, \\"net_out_new\\": $NET_OUT_NEW, \\"net_in_old\\": $NET_IN_OLD, \\"net_out_old\\": $NET_OUT_OLD }\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "3158fb78-50c5-4ee2-b9ff-34947867457d",
"name": "If2",
"type": "n8n-nodes-base.if",
"position": [
3240,
-320
],
"parameters": {
"options": {},
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "ac3730e4-8776-486b-b393-60ef103d35ea",
"operator": {
"type": "string",
"operation": "notEquals"
},
"leftValue": "={{ $('Split domain').item.json.mainDomain }}",
"rightValue": "d01-test.uuq.pl"
},
{
"id": "5baca1f0-fa26-4b78-ae94-44b876ac4fee",
"operator": {
"type": "string",
"operation": "notEquals"
},
"leftValue": "={{ $('Split domain').item.json.mainDomain }}",
"rightValue": "d02-test.uuq.pl"
}
]
}
},
"typeVersion": 2.2
},
{
"id": "8076c40a-793b-4831-af1d-6afe0bb46f35",
"name": "Split domain",
"type": "n8n-nodes-base.code",
"position": [
2740,
-320
],
"parameters": {
"jsCode": "const domain = $('API').item.json.body.domain;
const parts = domain.split('.');
let subDomain = '';
let mainDomain = domain;
if (parts.length > 2) {
subDomain = parts[0];
mainDomain = parts.slice(1).join('.');
}
return {
json: {
subDomain: subDomain,
mainDomain: mainDomain
}
};
"
},
"typeVersion": 2
},
{
"id": "042d1043-a64e-4aa9-84b5-fa4f47591542",
"name": "DNS Service Actions",
"type": "n8n-nodes-base.switch",
"position": [
3640,
-400
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "container_update_dns_record",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "8ac3b338-9407-4c8b-8e88-935cb017fbbe",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_update_dns_record"
}
]
},
"renameOutput": true
},
{
"outputKey": "create",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "102f10e9-ec6c-4e63-ba95-0fe6c7dc0bd1",
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "create"
}
]
},
"renameOutput": true
},
{
"outputKey": "suspend",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "f62dfa34-6751-4b34-adcc-3d6ba1b21a8c",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "suspend"
}
]
},
"renameOutput": true
},
{
"outputKey": "unsuspend",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "384d2026-b753-4c27-94c2-8f4fc189eb5f",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "unsuspend"
}
]
},
"renameOutput": true
},
{
"outputKey": "terminate",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "0e190a97-827a-4e87-8222-093ff7048b21",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "terminate"
}
]
},
"renameOutput": true
},
{
"outputKey": "change_package",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "6f7832f3-b61d-4517-ab6b-6007998136dd",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "change_package"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "38117189-c233-40c3-8dd0-67d94f4e868a",
"name": "DNS Parametrs",
"type": "n8n-nodes-base.set",
"position": [
3000,
-320
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "a6328600-7ee0-4031-9bdb-fcee99b79658",
"name": "api_url",
"type": "string",
"value": "https://your.pdns.url"
},
{
"id": "370ddc4e-0fc0-48f6-9b30-ebdfba72c62f",
"name": "api_key",
"type": "string",
"value": "your_api_key"
}
]
}
},
"typeVersion": 3.4
},
{
"id": "f9cf3c3e-83d0-46ea-983b-7f536ae8356d",
"name": "Add record",
"type": "n8n-nodes-base.httpRequest",
"onError": "continueRegularOutput",
"position": [
4000,
-440
],
"parameters": {
"url": "={{ $('DNS Parametrs').item.json.api_url }}/api/v1/servers/localhost/zones/{{ $('Split domain').item.json.mainDomain }}",
"body": "={
\"rrsets\": [
{
\"name\": \"{{ $('API').item.json.body.domain }}.\",
\"type\": \"CNAME\",
\"changetype\": \"REPLACE\",
\"ttl\": 300,
\"records\": [
{
\"content\": \"{{ $('API').item.json.body.server_domain }}.\",
\"disabled\": false
}
]
}
]
}
",
"method": "PATCH",
"options": {},
"sendBody": true,
"contentType": "raw",
"sendHeaders": true,
"rawContentType": "application/json",
"headerParameters": {
"parameters": [
{
"name": "X-API-Key",
"value": "={{ $('DNS Parametrs').item.json.api_key }}"
},
{
"name": "Content-Type",
"value": "application/json"
}
]
}
},
"typeVersion": 4.2,
"alwaysOutputData": true
},
{
"id": "f50c9fd0-efaa-42b3-aa03-ff66ca400299",
"name": "Delete record",
"type": "n8n-nodes-base.httpRequest",
"onError": "continueRegularOutput",
"position": [
4000,
-280
],
"parameters": {
"url": "={{ $('DNS Parametrs').item.json.api_url }}/api/v1/servers/localhost/zones/{{ $('Split domain').item.json.mainDomain }}",
"body": "={
\"rrsets\": [
{
\"name\": \"{{ $('API').item.json.body.domain }}.\",
\"type\": \"CNAME\",
\"changetype\": \"REPLACE\",
\"ttl\": 300,
\"records\": []
}
]
}
",
"method": "PATCH",
"options": {},
"sendBody": true,
"contentType": "raw",
"sendHeaders": true,
"rawContentType": "application/json",
"headerParameters": {
"parameters": [
{
"name": "X-API-Key",
"value": "={{ $('DNS Parametrs').item.json.api_key }}"
},
{
"name": "Content-Type",
"value": "application/json"
}
]
}
},
"typeVersion": 4.2,
"alwaysOutputData": true
},
{
"id": "d1823b67-662c-4bab-815b-0dda8da8284e",
"name": "API answer1",
"type": "n8n-nodes-base.respondToWebhook",
"onError": "continueRegularOutput",
"position": [
4000,
-580
],
"parameters": {
"options": {
"responseCode": 200
},
"respondWith": "json",
"responseBody": "{
\"status\": \"success\",
\"message\": \"\",
\"data\": \"\"
}
"
},
"typeVersion": 1.1,
"alwaysOutputData": true
},
{
"id": "446fdfae-adf1-45c1-b237-705a235e735a",
"name": "d01-test.uuq.pl",
"type": "n8n-nodes-base.ssh",
"onError": "continueErrorOutput",
"position": [
2900,
1560
],
"parameters": {
"cwd": "=/",
"command": "={{ $json.sh }}"
},
"credentials": {
"sshPassword": {
"id": "AxPODSmAvTNzqrJb",
"name": "SSH puq on d01-test.uuq.pl"
}
},
"executeOnce": true,
"typeVersion": 1
},
{
"id": "0a52c410-c82a-4cb6-872d-3dd328224db0",
"name": "d02-test.uuq.pl",
"type": "n8n-nodes-base.ssh",
"onError": "continueErrorOutput",
"position": [
2900,
1840
],
"parameters": {
"cwd": "=/",
"command": "={{ $json.sh }}"
},
"credentials": {
"sshPassword": {
"id": "JseVEj5f5icL4csj",
"name": "d02-test.uuq.pl"
}
},
"executeOnce": true,
"typeVersion": 1
},
{
"id": "9721b93a-4b20-4ea6-965e-44277613edee",
"name": "Servers Switch",
"type": "n8n-nodes-base.switch",
"position": [
2560,
1700
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "d01-test.uuq.pl",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.server_domain }}",
"rightValue": "d01-test.uuq.pl"
}
]
},
"renameOutput": true
},
{
"outputKey": "d02-test.uuq.pl",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "a032f373-4856-4b2d-b722-9a3ad36d12e7",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.server_domain }}",
"rightValue": "d02-test.uuq.pl"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "84d45b2c-e8bc-4a68-9c6a-051e12451c48",
"name": "Code",
"type": "n8n-nodes-base.code",
"position": [
3400,
1740
],
"parameters": {
"mode": "runOnceForEachItem",
"jsCode": "try {
if ($json.stdout === 'success') {
return {
json: {
status: 'success',
message: '',
data: '',
}
};
}
const parsedData = JSON.parse($json.stdout);
return {
json: {
status: parsedData.status === 'error' ? 'error' : 'success',
message: parsedData.message || (parsedData.status === 'error' ? 'An error occurred' : ''),
data: parsedData || '',
}
};
} catch (error) {
return {
json: {
status: 'error',
message: $json.stdout??$json.error,
data: '',
}
};
}"
},
"executeOnce": false,
"retryOnFail": false,
"typeVersion": 2,
"alwaysOutputData": false
},
{
"id": "d7e8d93c-0292-44ef-ac21-8c934d60a750",
"name": "API answer2",
"type": "n8n-nodes-base.respondToWebhook",
"position": [
3800,
1740
],
"parameters": {
"options": {
"responseCode": 200
},
"respondWith": "allIncomingItems"
},
"typeVersion": 1.1,
"alwaysOutputData": true
}
],
"active": true,
"pinData": {},
"settings": {
"callerPolicy": "workflowsFromSameOwner",
"executionOrder": "v1",
"saveManualExecutions": true,
"saveExecutionProgress": true,
"saveDataErrorExecution": "all",
"saveDataSuccessExecution": "all"
},
"versionId": "db430021-ac5c-4b7d-8512-8a6f04dc4952",
"connections": {
"If": {
"main": [
[
{
"node": "Container Stats",
"type": "main",
"index": 0
},
{
"node": "Container Actions",
"type": "main",
"index": 0
},
{
"node": "NextCloud",
"type": "main",
"index": 0
},
{
"node": "If1",
"type": "main",
"index": 0
}
],
[
{
"node": "422-Invalid server domain",
"type": "main",
"index": 0
}
]
]
},
"API": {
"main": [
[
{
"node": "Parametrs",
"type": "main",
"index": 0
}
]
]
},
"If1": {
"main": [
[
{
"node": "nginx",
"type": "main",
"index": 0
}
],
[
{
"node": "Service Actions",
"type": "main",
"index": 0
}
]
]
},
"If2": {
"main": [
[
{
"node": "DNS Service Actions",
"type": "main",
"index": 0
}
]
]
},
"Log": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Code": {
"main": [
[
{
"node": "API answer2",
"type": "main",
"index": 0
}
]
]
},
"Stat": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Stop": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Start": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Users": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"nginx": {
"main": [
[
{
"node": "Deploy-docker-compose",
"type": "main",
"index": 0
}
]
]
},
"Deploy": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"GET ACL": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"GET NET": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Inspect": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"SET ACL": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Suspend": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Version": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"NextCloud": {
"main": [
[
{
"node": "Version",
"type": "main",
"index": 0
}
],
[
{
"node": "Users",
"type": "main",
"index": 0
}
],
[
{
"node": "Change Password",
"type": "main",
"index": 0
}
]
]
},
"Parametrs": {
"main": [
[
{
"node": "If",
"type": "main",
"index": 0
}
]
]
},
"Unsuspend": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Mount Disk": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Terminated": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Split domain": {
"main": [
[
{
"node": "DNS Parametrs",
"type": "main",
"index": 0
}
]
]
},
"Unmount Disk": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"ChangePackage": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"DNS Parametrs": {
"main": [
[
{
"node": "If2",
"type": "main",
"index": 0
}
]
]
},
"Servers Switch": {
"main": [
[
{
"node": "d01-test.uuq.pl",
"type": "main",
"index": 0
}
],
[
{
"node": "d02-test.uuq.pl",
"type": "main",
"index": 0
}
]
]
},
"Change Password": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"Container Stats": {
"main": [
[
{
"node": "Inspect",
"type": "main",
"index": 0
}
],
[
{
"node": "Stat",
"type": "main",
"index": 0
}
],
[
{
"node": "Log",
"type": "main",
"index": 0
}
],
[
{
"node": "Dependent containers Stat",
"type": "main",
"index": 0
}
],
[
{
"node": "Split domain",
"type": "main",
"index": 0
}
]
]
},
"Service Actions": {
"main": [
[
{
"node": "Test Connection",
"type": "main",
"index": 0
}
],
[
{
"node": "Deploy",
"type": "main",
"index": 0
},
{
"node": "Split domain",
"type": "main",
"index": 0
}
],
[
{
"node": "Suspend",
"type": "main",
"index": 0
}
],
[
{
"node": "Unsuspend",
"type": "main",
"index": 0
}
],
[
{
"node": "Terminated",
"type": "main",
"index": 0
},
{
"node": "Split domain",
"type": "main",
"index": 0
}
],
[
{
"node": "ChangePackage",
"type": "main",
"index": 0
},
{
"node": "Split domain",
"type": "main",
"index": 0
}
]
]
},
"Test Connection": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
},
"d01-test.uuq.pl": {
"main": [
[
{
"node": "Code",
"type": "main",
"index": 0
}
],
[
{
"node": "Code",
"type": "main",
"index": 0
}
]
]
},
"d02-test.uuq.pl": {
"main": [
[
{
"node": "Code",
"type": "main",
"index": 0
}
],
[
{
"node": "Code",
"type": "main",
"index": 0
}
]
]
},
"Container Actions": {
"main": [
[
{
"node": "Start",
"type": "main",
"index": 0
}
],
[
{
"node": "Stop",
"type": "main",
"index": 0
}
],
[
{
"node": "Mount Disk",
"type": "main",
"index": 0
}
],
[
{
"node": "Unmount Disk",
"type": "main",
"index": 0
}
],
[
{
"node": "GET ACL",
"type": "main",
"index": 0
}
],
[
{
"node": "SET ACL",
"type": "main",
"index": 0
}
],
[
{
"node": "GET NET",
"type": "main",
"index": 0
}
]
]
},
"DNS Service Actions": {
"main": [
[
{
"node": "Add record",
"type": "main",
"index": 0
},
{
"node": "API answer1",
"type": "main",
"index": 0
}
],
[
{
"node": "Add record",
"type": "main",
"index": 0
}
],
[
{
"node": "Delete record",
"type": "main",
"index": 0
}
],
[
{
"node": "Add record",
"type": "main",
"index": 0
}
],
[
{
"node": "Delete record",
"type": "main",
"index": 0
}
],
[
{
"node": "Add record",
"type": "main",
"index": 0
}
]
]
},
"Deploy-docker-compose": {
"main": [
[
{
"node": "Service Actions",
"type": "main",
"index": 0
}
]
]
},
"Dependent containers Stat": {
"main": [
[
{
"node": "Servers Switch",
"type": "main",
"index": 0
}
]
]
}
}
}
功能特点
- 自动检测新邮件
- AI智能内容分析
- 自定义分类规则
- 批量处理能力
- 详细的处理日志
技术分析
节点类型及作用
- If
- Set
- Webhook
- Respondtowebhook
- Switch
复杂度评估
配置难度:
维护难度:
扩展性:
实施指南
前置条件
- 有效的Gmail账户
- n8n平台访问权限
- Google API凭证
- AI分类服务订阅
配置步骤
- 在n8n中导入工作流JSON文件
- 配置Gmail节点的认证信息
- 设置AI分类器的API密钥
- 自定义分类规则和标签映射
- 测试工作流执行
- 配置定时触发器(可选)
关键参数
| 参数名称 | 默认值 | 说明 |
|---|---|---|
| maxEmails | 50 | 单次处理的最大邮件数量 |
| confidenceThreshold | 0.8 | 分类置信度阈值 |
| autoLabel | true | 是否自动添加标签 |
最佳实践
优化建议
- 定期更新AI分类模型以提高准确性
- 根据邮件量调整处理批次大小
- 设置合理的分类置信度阈值
- 定期清理过期的分类规则
安全注意事项
- 妥善保管API密钥和认证信息
- 限制工作流的访问权限
- 定期审查处理日志
- 启用双因素认证保护Gmail账户
性能优化
- 使用增量处理减少重复工作
- 缓存频繁访问的数据
- 并行处理多个邮件分类任务
- 监控系统资源使用情况
故障排除
常见问题
邮件未被正确分类
检查AI分类器的置信度阈值设置,适当降低阈值或更新训练数据。
Gmail认证失败
确认Google API凭证有效且具有正确的权限范围,重新进行OAuth授权。
调试技巧
- 启用详细日志记录查看每个步骤的执行情况
- 使用测试邮件验证分类逻辑
- 检查网络连接和API服务状态
- 逐步执行工作流定位问题节点
错误处理
工作流包含以下错误处理机制:
- 网络超时自动重试(最多3次)
- API错误记录和告警
- 处理失败邮件的隔离机制
- 异常情况下的回滚操作